In [1]:
from util import *

import numpy as np
import pandas as pd
import string
import re
import matplotlib.pyplot as plt
%matplotlib inline

import plotly
from plotly import graph_objs
plotly.offline.init_notebook_mode()
from plotly.offline import iplot

import nltk
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from yellowbrick.text import FreqDistVisualizer
from yellowbrick.style import set_palette
set_palette('yellowbrick')
from wordcloud import WordCloud
from nltk.stem.porter import PorterStemmer
from textblob import TextBlob
from textblob import Word

from sklearn.model_selection import train_test_split
from sklearn import linear_model
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, confusion_matrix, precision_score, recall_score, f1_score, roc_auc_score, classification_report,balanced_accuracy_score, precision_recall_curve
from sklearn.metrics import confusion_matrix

import seaborn as sn

Import Dataset¶

In [2]:
df = pd.read_csv('labeled_data.csv')
df.head()
Out[2]:
Unnamed: 0 count hate_speech offensive_language neither class tweet
0 0 3 0 0 3 2 !!! RT @mayasolovely: As a woman you shouldn't...
1 1 3 0 3 0 1 !!!!! RT @mleew17: boy dats cold...tyga dwn ba...
2 2 3 0 3 0 1 !!!!!!! RT @UrKindOfBrand Dawg!!!! RT @80sbaby...
3 3 3 0 2 1 1 !!!!!!!!! RT @C_G_Anderson: @viva_based she lo...
4 4 6 0 6 0 1 !!!!!!!!!!!!! RT @ShenikaRoberts: The shit you...
In [3]:
df['class'].value_counts()
Out[3]:
class
1    19190
2     4163
0     1430
Name: count, dtype: int64
In [4]:
hate = len(df[df['class']==0])
neutral = len(df[df['class']==2])
offensive = len(df[df['class']==1])
print(f'hate : {hate} , neutral : {neutral}, offensive : {offensive}')

dist = [graph_objs.Bar(
    x=['hate','offensive','neutral'],
    y=[hate, offensive, neutral],
)]
plotly.offline.iplot({'data': dist, 'layout': graph_objs.Layout(title='Class Distribution Visualisation')})
hate : 1430 , neutral : 4163, offensive : 19190

Data Preprocessing¶

In [5]:
"""
    Remove Unnamed column from dataframe, 
    Keep only two classes hate_speech with class label 1 and combine offensive and neutral with class label 0
"""
df = df.drop('Unnamed: 0', axis = 1)
df['class'] = df['class'].replace(2,1)
df['class'] = df['class'].replace([0,1],[1,0])
In [6]:
df.head()
Out[6]:
count hate_speech offensive_language neither class tweet
0 3 0 0 3 0 !!! RT @mayasolovely: As a woman you shouldn't...
1 3 0 3 0 0 !!!!! RT @mleew17: boy dats cold...tyga dwn ba...
2 3 0 3 0 0 !!!!!!! RT @UrKindOfBrand Dawg!!!! RT @80sbaby...
3 3 0 2 1 0 !!!!!!!!! RT @C_G_Anderson: @viva_based she lo...
4 6 0 6 0 0 !!!!!!!!!!!!! RT @ShenikaRoberts: The shit you...
In [7]:
df['class'].value_counts()
Out[7]:
class
0    23353
1     1430
Name: count, dtype: int64
In [8]:
preprocess_tweet(df, 'tweet')
In [9]:
df.head()
Out[9]:
count hate_speech offensive_language neither class tweet
0 3 0 0 3 0 as a woman you shouldnt complain about cleanin...
1 3 0 3 0 0 boy dats coldtyga dwn bad for cuffin dat hoe i...
2 3 0 3 0 0 dawg you ever fuck a bitch and she start to cr...
3 3 0 2 1 0 she look like a tranny
4 6 0 6 0 0 the shit you hear about me might be true or it...
In [10]:
df1 = df.drop(columns =['count', 'hate_speech', 'offensive_language', 'neither'])
df1.head()
Out[10]:
class tweet
0 0 as a woman you shouldnt complain about cleanin...
1 0 boy dats coldtyga dwn bad for cuffin dat hoe i...
2 0 dawg you ever fuck a bitch and she start to cr...
3 0 she look like a tranny
4 0 the shit you hear about me might be true or it...
In [11]:
df1= df1.iloc[:,[1,0]]
In [12]:
df1.head()
Out[12]:
tweet class
0 as a woman you shouldnt complain about cleanin... 0
1 boy dats coldtyga dwn bad for cuffin dat hoe i... 0
2 dawg you ever fuck a bitch and she start to cr... 0
3 she look like a tranny 0
4 the shit you hear about me might be true or it... 0

Splitting testing and training set¶

In [13]:
#for ease of use, changing column name from class to target
df1 = df1.rename(columns={'class': 'target'})
X = df1.tweet
y = df1.target

X_tr, X_val, y_tr, y_val = train_test_split(X, y, test_size=0.2, random_state=42)
In [14]:
train = pd.concat([X_tr, y_tr], axis=1).reset_index()
train = train.drop(columns=['index'], axis=1)
train.head()
Out[14]:
tweet target
0 well how else will white ppl get us to forget ... 1
1 funny thing isits not just the people doing it... 0
2 nigga messed with the wrong bitch 0
3 bitch ass nigggaaa 0
4 so that real bitch 0
In [15]:
val = pd.concat([X_val, y_val], axis=1).reset_index()
val = val.drop(columns=['index'], axis=1)

Visualizations¶

In [16]:
zero = train[train.target == 0]
one = train[train.target == 1]

zero_tokens = tokenize(zero, 'tweet')
one_tokens = tokenize(one, 'tweet')
zero_tokenz = no_stopwords(zero_tokens)
one_tokenz = no_stopwords(one_tokens)
In [17]:
plot_frequency_dist(zero_tokenz, 'Non-Hate')
/Users/manveerkaur/Documents/SentimentAnalysis/util.py:133: UserWarning:

Matplotlib is currently using module://matplotlib_inline.backend_inline, which is a non-GUI backend, so cannot show the figure.

In [18]:
plot_frequency_dist(one_tokenz, 'Hate')
In [19]:
## Generate and plot wordcloud of hate tweets
plot_wordCloud(one_tokenz)
In [20]:
## Generate and plot wordcloud of not-hate tweets
plot_wordCloud(zero_tokenz)

Vocabulary Unique to hate speech¶

In [21]:
hate_list = np.setdiff1d(one_tokenz, zero_tokenz)
hate_list
Out[21]:
array(['aa', 'absolved', 'accord', 'acknowledged', 'activity', 'aflcio',
       'aged', 'agg', 'ahhhahahaha', 'ahmed', 'airlines', 'aklve',
       'alaqsa', 'alcoholics', 'alls', 'alsarabsss', 'amazement',
       'americathey', 'amigo', 'anglo', 'anon', 'antiracist',
       'antisemite', 'antizionist', 'apartheid', 'appearance',
       'argentino', 'ariza', 'arkansas', 'aryan', 'aslina', 'attorney',
       'axin', 'azflooding', 'azmonsoon', 'backpedals', 'baiters', 'baka',
       'balless', 'ballless', 'banner', 'banwagoning', 'barge',
       'barnyard', 'bateman', 'batshit', 'bazinga', 'bdubs', 'beamthat',
       'beiber', 'believes', 'belton', 'benghazzi', 'benton', 'bernstine',
       'beta', 'bias', 'bibles', 'bidens', 'bikes', 'birthdayyyy',
       'bisexual', 'bitcheslook', 'blacklisted', 'blaspheme',
       'blondeproblems', 'boris', 'boyraping', 'brainwash', 'brainwashed',
       'bran', 'brits', 'bromance', 'broner', 'buckcity', 'buffets',
       'buku', 'bulldozed', 'bundle', 'butcountry', 'butthole',
       'buyfoodlittleguy', 'californias', 'cantstanducunt', 'capital',
       'carve', 'catholics', 'caused', 'causung', 'ceasefirelets',
       'celtic', 'cement', 'chava', 'chelsey', 'chimpout', 'chinatown',
       'ching', 'chong', 'chood', 'chromeasome', 'chu', 'chuu', 'chyna',
       'circulated', 'clans', 'clash', 'clashes', 'clones', 'clout',
       'cob', 'codeword', 'combined', 'complains', 'comthablesmh',
       'condone', 'conduct', 'confronts', 'connection', 'coulter',
       'cousintoucher', 'coworkeri', 'cracks', 'creation',
       'credibilityshot', 'criminally', 'crisco', 'crusader', 'cspan',
       'dammn', 'dans', 'darling', 'dds', 'dealcry', 'dealt', 'deedee',
       'deeds', 'deeeeaaaadd', 'deen', 'defence', 'delbert', 'democr',
       'deviancy', 'devin', 'dicklicker', 'dickwad', 'dietoday',
       'digital', 'dome', 'donts', 'doubles', 'doughnuts', 'downsize',
       'drakes', 'drreams', 'dryer', 'dss', 'dtla', 'ducked', 'duis',
       'dumby', 'ebloa', 'eda', 'enduring', 'engineering', 'enraged',
       'entertains', 'ep', 'escape', 'establishments', 'ethiopian',
       'evaaaa', 'everycunt', 'exact', 'explanation', 'faaaaggggottttt',
       'facedniggers', 'faggotsfag', 'fagjo', 'fagsplease', 'fairytale',
       'farmers', 'farrakhan', 'farve', 'fathom', 'faux', 'faves',
       'favorited', 'feminist', 'fergusonriot', 'fieldssuburbs',
       'fightpacquiao', 'fisted', 'fitz', 'flattering', 'flinched',
       'flopping', 'flowing', 'foolishness', 'forced', 'forsake', 'fredo',
       'fsu', 'fuckheads', 'fuckry', 'fudg', 'fuggin', 'furrybah',
       'gainz', 'ganks', 'gates', 'gayer', 'gaywad', 'gaywrites',
       'gazelles', 'gee', 'genetic', 'genos', 'gerryshalloweenparty',
       'gettingreal', 'gezus', 'girlboy', 'glitter', 'gobbling', 'goddam',
       'goddamit', 'goldbar', 'goper', 'grier', 'grilled', 'gusta',
       'gypsies', 'hahahahahahaha', 'hairstyle', 'haiti', 'halfbreeds',
       'hamster', 'happenings', 'happppppy', 'harassment', 'hayseed',
       'healedback', 'hebrew', 'heil', 'helpful', 'hesgay', 'hesitation',
       'hesters', 'heterosexual', 'heyyyyyyyyyyy', 'highlights', 'hindis',
       'hires', 'hiring', 'historically', 'hitched', 'hoesand', 'hoetru',
       'hollering', 'homewreckers', 'homophobic', 'honcho', 'honeybooboo',
       'honour', 'hoomie', 'horrific', 'hound', 'huff', 'hugging',
       'husbandry', 'hustlin', 'hypebeasts', 'ians', 'idfk', 'immoral',
       'immune', 'imperfections', 'inclined', 'increase', 'indentured',
       'indicator', 'indiviuals', 'infatuation', 'infest', 'infiltration',
       'influenced', 'injust', 'inspect', 'intvw', 'invites', 'inviting',
       'involve', 'islamnation', 'isolated', 'itwas', 'jackies', 'jai',
       'japped', 'japs', 'jennas', 'jerkin', 'jigaboos', 'jock', 'judged',
       'julie', 'jumpers', 'juvie', 'kakao', 'kamikaze', 'kbye',
       'kennies', 'kindergarden', 'knife', 'knob', 'knockdowns',
       'knooooooow', 'knowur', 'latinkings', 'leftisthomosexual',
       'leftists', 'legitimizing', 'lego', 'legshis', 'lexii', 'liberty',
       'lid', 'liesaboutvinscully', 'lifestyle', 'limelight', 'listeners',
       'looooool', 'losangeles', 'lotto', 'lrg', 'lucas', 'lustboy',
       'lynch', 'macs', 'madonnas', 'magazine', 'malt', 'manhood', 'mao',
       'maoists', 'mariachi', 'maryland', 'mayoral', 'mccartney', 'medal',
       'memphistn', 'merely', 'mernin', 'metlife', 'mexicannigger', 'mgr',
       'mideast', 'midlaner', 'midwest', 'migrating', 'mikey',
       'milesthompson', 'milwaukie', 'minorities', 'mirin', 'mischief',
       'misty', 'moccasin', 'mohamed', 'molester', 'mongerls', 'mongrels',
       'monkeys', 'monkies', 'moslems', 'mouthy', 'muzzy', 'naacp',
       'nahhhhhaahahahaha', 'nations', 'nazis', 'nbombs', 'nebraska',
       'neveraskablackperson', 'newyorkcity', 'nggas', 'nicely', 'niger',
       'niggass', 'niggerous', 'nigglets', 'niggress', 'nikejordan',
       'nochill', 'nonenglish', 'noneuropeans', 'nontraditional',
       'nonwhites', 'notices', 'ntx', 'nurturing', 'nws', 'obese', 'odb',
       'ofmine', 'okcupid', 'okiecops', 'okies', 'olympic', 'openwide',
       'oppressing', 'oppressive', 'orchids', 'osamas', 'ove', 'ovenjew',
       'overbreeding', 'overrun', 'oversensitive', 'panthers',
       'parenthetical', 'paypay', 'peasant', 'peckin', 'pedestrian',
       'peds', 'pennsylvanians', 'peoplehate', 'perish',
       'pgachampionship', 'phelps', 'phillip', 'phillips', 'phrase',
       'pickananny', 'pickers', 'picky', 'placement', 'placing', 'plant',
       'plantation', 'polynesians', 'pontiac', 'ponytails', 'porto',
       'pos', 'potheads', 'powered', 'premium', 'preparations',
       'prestigious', 'priesthood', 'printer', 'printers', 'propery',
       'proslavery', 'psychiatry', 'pundits', 'pussyed', 'pwi', 'queersi',
       'rabchenko', 'racismisaliveandwellbro', 'radical', 'ramlogan',
       'randos', 'rapists', 'rasta', 'reasonswecantbetogether',
       'receptionist', 'receptionthis', 'reconnaissance', 'recruited',
       'referred', 'refused', 'regionally', 'rejects', 'religions',
       'repping', 'reptile', 'reside', 'restau', 'retared', 'retweeettt',
       'rfn', 'rhode', 'ricans', 'roid', 'roleplayinggames', 'romeo',
       'route', 'salvadoran', 'samesex', 'sandusky', 'schitt', 'scope',
       'scully', 'segal', 'servant', 'sewer', 'sexist', 'shabbat',
       'sharpie', 'sheboons', 'ship', 'shock', 'shoving', 'sickening',
       'sidekicklike', 'sion', 'sistas', 'sixes', 'skater', 'skidmarks',
       'slightlyadjusted', 'slum', 'snipe', 'soetoroobama', 'sopa',
       'sophi', 'soxs', 'spaz', 'spicskkk', 'sprinkler', 'stacey',
       'stalkin', 'standn', 'stds', 'stephenking', 'stereotypi',
       'stoopid', 'stopsavinthesehoes', 'stu', 'stubborn', 'stuckup',
       'studies', 'styl', 'styles', 'subhuman', 'subordinate', 'summers',
       'suspicious', 'swaagg', 'swags', 'swill', 'sycksyllables',
       'tapout', 'taxing', 'teabagged', 'teabaggerswho', 'teammate',
       'teenage', 'tehgodclan', 'templars', 'terroristscongies',
       'texarkana', 'thenetherlands', 'therelike', 'thetime',
       'theyfaggots', 'thingsiwillteachmychild', 'thnk', 'timmys',
       'tittyy', 'tmt', 'toms', 'tomyfacebro', 'toosoon', 'traditions',
       'tragedy', 'trannygo', 'transformthursday', 'transmitter',
       'trashiest', 'trayvonmartin', 'trout', 'tsm', 'tunis', 'tunwhat',
       'tusks', 'tweetlikepontiacholmes', 'uf', 'units', 'unselfish',
       'unwashed', 'uwi', 'vaca', 'vanessa', 'vddie', 'vegasshowgirls',
       'vhia', 'vin', 'vinitahegwood', 'waahh', 'wacthh', 'wagging',
       'wallet', 'warehouse', 'warrior', 'weapon', 'wedges', 'weirdos',
       'welldid', 'wenchs', 'westvirginia', 'wher', 'whitepowerill',
       'whitest', 'whomp', 'whooooo', 'whse', 'wifebeater', 'willed',
       'wishywashy', 'witcho', 'woohoo', 'wooooow', 'worryol', 'wrongbut',
       'wrongwitch', 'yamming', 'yaselves', 'yeawhat', 'youuuuu', 'zak',
       'zigeuner', 'zion', 'zipperheads', 'zzzzzz'], dtype='<U23')
In [22]:
hate_tokenz = [x for x in one_tokenz if x in hate_list]
In [23]:
plot_frequency_dist(one_tokenz, "Hate (unique tokens)")
/Users/manveerkaur/Documents/SentimentAnalysis/util.py:133: UserWarning:

Matplotlib is currently using module://matplotlib_inline.backend_inline, which is a non-GUI backend, so cannot show the figure.

In [24]:
plot_wordCloud(hate_tokenz)

Stopword Removal, Tokenization, Stemming, and Lemmatization¶

In [25]:
train.tweet = train.tweet.apply(lambda x: re.sub(r'\b\w{1,2}\b', '', str(x)))
val.tweet = val.tweet.apply(lambda x: re.sub(r'\b\w{1,2}\b', '', str(x)))

train_tokens = tokenize(train, 'tweet')
val_tokens = tokenize(val, 'tweet')
train_tokenz = no_stopwords(train_tokens)
val_tokenz = no_stopwords(val_tokens)
In [26]:
stop_words = set(stopwords.words('english'))
stop_list = [''.join(c for c in s if c not in string.punctuation) for s in stop_words]
In [27]:
train_stem = stemming(train_tokenz)
val_stem = stemming(val_tokenz)
In [28]:
lemmatization(train)
lemmatization(val)
train.lem = train['lem'].apply(lambda x: ' '.join([item for item in x.split() if item not in stop_list]))
val.lem = val['lem'].apply(lambda x: ' '.join([item for item in x.split() if item not in stop_list]))
In [29]:
train.head()
Out[29]:
tweet target lem
0 well how else will white ppl get forget our ... 1 well else white ppl get forget horrific past p...
1 funny thing isits not just the people doing i... 0 funny thing isits people people seeing pic jud...
2 nigga messed with the wrong bitch 0 nigga messed wrong bitch
3 bitch ass nigggaaa 0 bitch nigggaaa
4 that real bitch 0 real bitch

Separating tweet and target for model¶

In [30]:
X_tr = train.lem
X_val = val.lem
y_tr = train.target
y_val = val.target

TF-IDF Vectorizer¶

In [31]:
vec = TfidfVectorizer()
tfidf_tr = vec.fit_transform(X_tr)
tfidf_val = vec.transform(X_val)

Multinomial Naive-Bayes¶

In [32]:
nb = MultinomialNB().fit(tfidf_tr, y_tr)
y_pr_nb_tr = nb.predict(tfidf_tr)
y_pr_nb_val = nb.predict(tfidf_val)
get_metrics(tfidf_tr, y_tr, tfidf_val, y_val, y_pr_nb_tr, y_pr_nb_val, nb)
Training F1 Score:  0.012195121951219513
Testing F1 Score:  0.0
Training Recall Score:  0.0061403508771929825
Testing Recall Score:  0.0
Training Precision Score:  0.875
Testing Precision Score:  0.0
Training Accuracy Score:  0.9428023807121961
Testing Accuracy Score:  0.9414968731087351
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/metrics/_classification.py:1344: UndefinedMetricWarning:

Precision is ill-defined and being set to 0.0 due to no predicted samples. Use `zero_division` parameter to control this behavior.

In [33]:
make_confusion_matrix(y_val, y_pr_nb_val)

Random forest classifier¶

In [34]:
rf = RandomForestClassifier(n_estimators=100).fit(tfidf_tr, y_tr)
y_pr_rf_tr = rf.predict(tfidf_tr)
y_pr_rf_val = rf.predict(tfidf_val)
get_metrics(tfidf_tr, y_tr, tfidf_val, y_val, y_pr_rf_tr, y_pr_rf_val, rf)
Training F1 Score:  0.9824715162138475
Testing F1 Score:  0.1891117478510029
Training Recall Score:  0.9833333333333333
Testing Recall Score:  0.11379310344827587
Training Precision Score:  0.9816112084063048
Testing Precision Score:  0.559322033898305
Training Accuracy Score:  0.9979824472914355
Testing Accuracy Score:  0.942909017550938
In [36]:
make_confusion_matrix(y_val, y_pr_rf_val)

Logistic Regression¶

In [37]:
log = LogisticRegression().fit(tfidf_tr, y_tr)
y_pr_log_tr = log.predict(tfidf_tr)
y_pr_log_val = log.predict(tfidf_val)
get_metrics(tfidf_tr, y_tr, tfidf_val, y_val, y_pr_log_tr, y_pr_log_val, log)
Training F1 Score:  0.25584795321637427
Testing F1 Score:  0.18487394957983194
Training Recall Score:  0.15350877192982457
Testing Recall Score:  0.11379310344827587
Training Precision Score:  0.7675438596491229
Testing Precision Score:  0.4925373134328358
Training Accuracy Score:  0.9486532835670332
Testing Accuracy Score:  0.9412951381884204
In [38]:
make_confusion_matrix(y_val, y_pr_log_val)

Balancing Dataset¶

In [39]:
df = pd.read_csv('balanced_data_combined.csv')
df.head()
Out[39]:
Unnamed: 0 text class
0 0 Drasko they didn't cook half a bird you idiot ... 1
1 1 Hopefully someone cooks Drasko in the next ep ... 1
2 2 of course you were born in serbia...you're as ... 1
3 3 These girls are the equivalent of the irritati... 1
4 4 RT @YesYoureRacist: At least you're only a tin... 1
In [40]:
df = df.drop(columns = 'Unnamed: 0')
In [41]:
df.shape
Out[41]:
(8337, 2)
In [42]:
df['class'].value_counts()
Out[42]:
class
1    4174
0    4163
Name: count, dtype: int64
In [43]:
df.info()
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 8337 entries, 0 to 8336
Data columns (total 2 columns):
 #   Column  Non-Null Count  Dtype 
---  ------  --------------  ----- 
 0   text    8335 non-null   object
 1   class   8337 non-null   int64 
dtypes: int64(1), object(1)
memory usage: 130.4+ KB
In [44]:
hate = len(df[df['class']==1])
not_hate = len(df[df['class']==0])

print(f'hate : {hate} , not_hate : {not_hate}')

dist = [graph_objs.Bar(
    x=['hate','not_hate'],
    y=[hate, not_hate],
)]
plotly.offline.iplot({'data': dist, 'layout': graph_objs.Layout(title='Class Distribution Visualisation')})
hate : 4174 , not_hate : 4163

Data Preprocessing¶

In [45]:
df.dropna(subset=['text'], inplace=True)
In [46]:
df = df.rename(columns={'text': 'tweet'})
preprocess_tweet(df, 'tweet')

Splitting testing and training set¶

In [47]:
#for ease of use, changing column name from class to target
df1 = df.rename(columns={'class': 'target'})
X = df1.tweet
y = df1.target

X_tr, X_val, y_tr, y_val = train_test_split(X, y, test_size=0.2, random_state=42)
In [48]:
train = pd.concat([X_tr, y_tr], axis=1).reset_index()
train = train.drop(columns=['index'], axis=1)
train.head()
Out[48]:
tweet target
0 im not sexist but when i see a lady driving a ... 1
1 ur a faggot if you change your name to your bd... 1
2 the early bearded man gets the clam herkfacts 0
3 i need a meme of thatthats my day in a nutshell 1
4 even nature looks awesome when colored in 0
In [49]:
val = pd.concat([X_val, y_val], axis=1).reset_index()
val = val.drop(columns=['index'], axis=1)

Stopword Removal, Tokenization, Stemming, and Lemmatization¶

In [50]:
stop_words = set(stopwords.words('english'))
stop_list = [''.join(c for c in s if c not in string.punctuation) for s in stop_words]
In [51]:
train.tweet = train.tweet.apply(lambda x: re.sub(r'\b\w{1,2}\b', '', str(x)))
val.tweet = val.tweet.apply(lambda x: re.sub(r'\b\w{1,2}\b', '', str(x)))

train_tokens = tokenize(train, 'tweet')
val_tokens = tokenize(val, 'tweet')
train_tokenz = no_stopwords(train_tokens)
val_tokenz = no_stopwords(val_tokens)
In [52]:
train_stem = stemming(train_tokenz)
val_stem = stemming(val_tokenz)
In [53]:
lemmatization(train)
lemmatization(val)
train.lem = train['lem'].apply(lambda x: ' '.join([item for item in x.split() if item not in stop_list]))
val.lem = val['lem'].apply(lambda x: ' '.join([item for item in x.split() if item not in stop_list]))

Visualizations¶

In [55]:
zero = train[train.target == 0]
one = train[train.target == 1]

zero_tokens = tokenize(zero, 'tweet')
one_tokens = tokenize(one, 'tweet')
zero_tokenz = no_stopwords(zero_tokens)
one_tokenz = no_stopwords(one_tokens)
In [56]:
plot_frequency_dist(one_tokenz, 'Hate')
In [57]:
plot_frequency_dist(zero_tokenz, 'Non-Hate')
In [58]:
#word cloud of hate tokens
plot_wordCloud(one_tokenz)
In [59]:
#word cloud of non-hate tokens
plot_wordCloud(zero_tokenz)
In [60]:
hate_list = np.setdiff1d(one_tokenz, zero_tokenz)
hate_list
Out[60]:
array(['aaaaaaaaand', 'aaand', 'aaronmacgruder', ..., 'zoes', 'zooming',
       'zzzzzz'], dtype='<U29')
In [61]:
hate_tokenz = [x for x in one_tokenz if x in hate_list]
In [62]:
print(f"Total no. of unique hate tokens: {len(hate_list)}")
print(f"Count of occurence of unique hate tokens in tweets: {len(hate_tokenz)}")
Total no. of unique hate tokens: 3538
Count of occurence of unique hate tokens in tweets: 7772
In [63]:
plot_frequency_dist(hate_tokenz, 'Unique hate')
/Users/manveerkaur/Documents/SentimentAnalysis/util.py:133: UserWarning:

Matplotlib is currently using module://matplotlib_inline.backend_inline, which is a non-GUI backend, so cannot show the figure.

In [64]:
# word cloud depicting occurence of unique hate tokens
plot_wordCloud(hate_tokenz)

Model building¶

In [65]:
X_tr = train.lem
X_val = val.lem
y_tr = train.target
y_val = val.target
In [66]:
vec = TfidfVectorizer()
tfidf_tr = vec.fit_transform(X_tr)
tfidf_val = vec.transform(X_val)

Multinomial Naive Bayes¶

In [67]:
nb = MultinomialNB().fit(tfidf_tr, y_tr)
y_pr_nb_tr = nb.predict(tfidf_tr)
y_pr_nb_val = nb.predict(tfidf_val)
get_metrics(tfidf_tr, y_tr, tfidf_val, y_val, y_pr_nb_tr, y_pr_nb_val, nb)
Training F1 Score:  0.9688663786682558
Testing F1 Score:  0.8943374197314654
Training Recall Score:  0.9742360695026963
Testing Recall Score:  0.9184652278177458
Training Precision Score:  0.9635555555555556
Testing Precision Score:  0.8714448236632537
Training Accuracy Score:  0.9686562687462508
Testing Accuracy Score:  0.8914217156568687
In [68]:
make_confusion_matrix(y_val, y_pr_nb_val)

Random Forest Classifier¶

In [69]:
rf = RandomForestClassifier(n_estimators=100).fit(tfidf_tr, y_tr)
y_pr_rf_tr = rf.predict(tfidf_tr)
y_pr_rf_val = rf.predict(tfidf_val)
get_metrics(tfidf_tr, y_tr, tfidf_val, y_val, y_pr_rf_tr, y_pr_rf_val, rf)
Training F1 Score:  0.9994010182689429
Testing F1 Score:  0.9126099706744868
Training Recall Score:  0.9997004194128221
Testing Recall Score:  0.9328537170263789
Training Precision Score:  0.9991017964071857
Testing Precision Score:  0.8932261768082663
Training Accuracy Score:  0.9994001199760048
Testing Accuracy Score:  0.910617876424715
In [70]:
make_confusion_matrix(y_val,y_pr_rf_val)

Logistic Regression¶

In [71]:
log = LogisticRegression().fit(tfidf_tr, y_tr)
y_pr_log_tr = log.predict(tfidf_tr)
y_pr_log_val = log.predict(tfidf_val)
get_metrics(tfidf_tr, y_tr, tfidf_val, y_val, y_pr_log_tr, y_pr_log_val, log)
Training F1 Score:  0.9637891520244461
Testing F1 Score:  0.9035087719298246
Training Recall Score:  0.944877171959257
Testing Recall Score:  0.8645083932853717
Training Precision Score:  0.9834736513875897
Testing Precision Score:  0.9461942257217848
Training Accuracy Score:  0.9644571085782844
Testing Accuracy Score:  0.907618476304739
In [72]:
make_confusion_matrix(y_val, y_pr_log_val)

Hyper-parameter tuning¶

In [73]:
from sklearn.model_selection import GridSearchCV
In [99]:
# Number of trees in random forrest
n_estimators = [int(x) for x in np.linspace(start = 50, stop = 200, num =5)]

# number of features to consider at each split
max_features = ['auto', 'sqrt']

# Max number of levels in tree
max_depth = [2,4]

# min number of samples required to splid the node
min_samples_split =[2,5]

# min number of samples required at each leaf node
min_samples_leaf =[1,2]

#The function to measure the quality of a split.
#criterion = ['gini', 'entropy', 'log_loss']
In [100]:
param_grid = {'n_estimators' : n_estimators,
             'max_features' : max_features,
             'max_depth' : max_depth,
             'min_samples_split' : min_samples_split,
             'min_samples_leaf' : min_samples_leaf}
             #'criterion': criterion}
             
param_grid
Out[100]:
{'n_estimators': [50, 87, 125, 162, 200],
 'max_features': ['auto', 'sqrt'],
 'max_depth': [2, 4],
 'min_samples_split': [2, 5],
 'min_samples_leaf': [1, 2]}
In [101]:
rf_momdel = RandomForestClassifier()
rf_grid = GridSearchCV(estimator = rf_momdel, param_grid = param_grid, cv = 3, verbose = 3, n_jobs = 4, scoring = 'recall' )
In [89]:
import warnings
In [90]:
warnings.filterwarnings('ignore')
In [102]:
rf_grid.fit(tfidf_tr, y_tr)
Fitting 3 folds for each of 80 candidates, totalling 240 fits
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
[CV 1/3] END criterion=entropy, max_depth=2, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=125;, score=0.700 total time=   0.2s
[CV 2/3] END criterion=entropy, max_depth=2, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=162;, score=0.719 total time=   0.3s
[CV 3/3] END criterion=entropy, max_depth=2, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=200;, score=0.743 total time=   0.3s
[CV 1/3] END criterion=entropy, max_depth=2, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=162;, score=0.702 total time=   0.3s
[CV 2/3] END criterion=entropy, max_depth=2, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=200;, score=0.818 total time=   0.3s
[CV 1/3] END criterion=entropy, max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=87;, score=0.931 total time=   0.1s
[CV 1/3] END criterion=entropy, max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=125;, score=0.753 total time=   0.2s
[CV 2/3] END criterion=entropy, max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=162;, score=0.791 total time=   0.2s
[CV 3/3] END criterion=entropy, max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=200;, score=0.819 total time=   0.3s
[CV 3/3] END criterion=entropy, max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=125;, score=0.745 total time=   0.2s
[CV 1/3] END criterion=entropy, max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=200;, score=0.765 total time=   0.3s
[CV 3/3] END criterion=entropy, max_depth=2, max_features=log2, min_samples_leaf=1, min_samples_split=2, n_estimators=50;, score=0.957 total time=   0.1s
[CV 2/3] END criterion=entropy, max_depth=2, max_features=log2, min_samples_leaf=1, min_samples_split=2, n_estimators=87;, score=0.960 total time=   0.1s
[CV 2/3] END criterion=entropy, max_depth=2, max_features=log2, min_samples_leaf=1, min_samples_split=2, n_estimators=125;, score=0.933 total time=   0.1s
[CV 3/3] END criterion=entropy, max_depth=2, max_features=log2, min_samples_leaf=1, min_samples_split=2, n_estimators=162;, score=0.581 total time=   0.2s
[CV 3/3] END criterion=entropy, max_depth=2, max_features=log2, min_samples_leaf=1, min_samples_split=2, n_estimators=200;, score=0.541 total time=   0.2s
[CV 1/3] END criterion=entropy, max_depth=2, max_features=log2, min_samples_leaf=1, min_samples_split=5, n_estimators=125;, score=0.916 total time=   0.1s
[CV 2/3] END criterion=entropy, max_depth=2, max_features=log2, min_samples_leaf=1, min_samples_split=5, n_estimators=162;, score=0.606 total time=   0.2s
[CV 3/3] END criterion=entropy, max_depth=2, max_features=log2, min_samples_leaf=1, min_samples_split=5, n_estimators=200;, score=0.909 total time=   0.2s
[CV 1/3] END criterion=entropy, max_depth=2, max_features=log2, min_samples_leaf=2, min_samples_split=2, n_estimators=125;, score=0.936 total time=   0.1s
[CV 2/3] END criterion=entropy, max_depth=2, max_features=log2, min_samples_leaf=2, min_samples_split=2, n_estimators=162;, score=0.919 total time=   0.2s
[CV 1/3] END criterion=entropy, max_depth=2, max_features=log2, min_samples_leaf=2, min_samples_split=5, n_estimators=50;, score=0.114 total time=   0.1s
[CV 3/3] END criterion=entropy, max_depth=2, max_features=log2, min_samples_leaf=2, min_samples_split=5, n_estimators=50;, score=0.973 total time=   0.1s
[CV 3/3] END criterion=entropy, max_depth=2, max_features=log2, min_samples_leaf=2, min_samples_split=5, n_estimators=87;, score=0.938 total time=   0.1s
[CV 3/3] END criterion=entropy, max_depth=2, max_features=log2, min_samples_leaf=2, min_samples_split=5, n_estimators=125;, score=0.930 total time=   0.2s
[CV 1/3] END criterion=entropy, max_depth=2, max_features=log2, min_samples_leaf=2, min_samples_split=5, n_estimators=200;, score=0.929 total time=   0.2s
[CV 2/3] END criterion=entropy, max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=50;, score=0.748 total time=   0.1s
[CV 2/3] END criterion=entropy, max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=87;, score=0.767 total time=   0.2s
[CV 3/3] END criterion=entropy, max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=125;, score=0.712 total time=   0.3s
[CV 1/3] END criterion=entropy, max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=200;, score=0.759 total time=   0.6s
[CV 2/3] END criterion=entropy, max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=50;, score=0.727 total time=   0.1s
[CV 1/3] END criterion=entropy, max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=87;, score=0.699 total time=   0.2s
[CV 2/3] END criterion=entropy, max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=125;, score=0.752 total time=   0.3s
[CV 3/3] END criterion=entropy, max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=162;, score=0.717 total time=   0.5s
[CV 1/3] END criterion=entropy, max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=50;, score=0.617 total time=   0.1s
[CV 3/3] END criterion=entropy, max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=50;, score=0.709 total time=   0.1s
[CV 3/3] END criterion=entropy, max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=87;, score=0.801 total time=   0.2s
[CV 1/3] END criterion=entropy, max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=162;, score=0.779 total time=   0.4s
[CV 2/3] END criterion=entropy, max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=200;, score=0.772 total time=   0.5s
[CV 1/3] END criterion=entropy, max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=87;, score=0.723 total time=   0.3s
[CV 1/3] END criterion=entropy, max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=125;, score=0.729 total time=   0.3s
[CV 2/3] END criterion=entropy, max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=162;, score=0.741 total time=   0.4s
[CV 3/3] END criterion=entropy, max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=200;, score=0.721 total time=   0.5s
[CV 3/3] END criterion=entropy, max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=125;, score=0.783 total time=   0.3s
[CV 1/3] END criterion=entropy, max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=200;, score=0.767 total time=   0.5s
[CV 2/3] END criterion=entropy, max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=50;, score=0.664 total time=   0.1s
[CV 2/3] END criterion=entropy, max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=87;, score=0.732 total time=   0.2s
[CV 2/3] END criterion=entropy, max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=125;, score=0.733 total time=   0.3s
[CV 3/3] END criterion=entropy, max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=162;, score=0.766 total time=   0.4s
[CV 3/3] END criterion=entropy, max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=200;, score=0.759 total time=   0.5s
[CV 1/3] END criterion=entropy, max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=162;, score=0.770 total time=   0.4s
[CV 2/3] END criterion=entropy, max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=200;, score=0.713 total time=   0.5s
[CV 1/3] END criterion=entropy, max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=87;, score=0.692 total time=   0.2s
[CV 1/3] END criterion=entropy, max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=125;, score=0.745 total time=   0.3s
[CV 2/3] END criterion=entropy, max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=162;, score=0.695 total time=   0.4s
[CV 3/3] END criterion=entropy, max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=200;, score=0.732 total time=   0.5s
[CV 3/3] END criterion=entropy, max_depth=4, max_features=log2, min_samples_leaf=1, min_samples_split=2, n_estimators=162;, score=0.664 total time=   0.2s
[CV 1/3] END criterion=entropy, max_depth=4, max_features=log2, min_samples_leaf=1, min_samples_split=5, n_estimators=50;, score=0.465 total time=   0.1s
[CV 2/3] END criterion=entropy, max_depth=4, max_features=log2, min_samples_leaf=1, min_samples_split=5, n_estimators=50;, score=0.932 total time=   0.1s
[CV 1/3] END criterion=entropy, max_depth=4, max_features=log2, min_samples_leaf=1, min_samples_split=5, n_estimators=87;, score=0.543 total time=   0.1s
[CV 2/3] END criterion=entropy, max_depth=4, max_features=log2, min_samples_leaf=1, min_samples_split=5, n_estimators=125;, score=0.652 total time=   0.2s
[CV 3/3] END criterion=entropy, max_depth=4, max_features=log2, min_samples_leaf=1, min_samples_split=5, n_estimators=162;, score=0.721 total time=   0.3s
[CV 1/3] END criterion=entropy, max_depth=4, max_features=log2, min_samples_leaf=2, min_samples_split=2, n_estimators=50;, score=0.262 total time=   0.1s
[CV 3/3] END criterion=entropy, max_depth=4, max_features=log2, min_samples_leaf=2, min_samples_split=2, n_estimators=50;, score=0.944 total time=   0.1s
[CV 2/3] END criterion=entropy, max_depth=4, max_features=log2, min_samples_leaf=2, min_samples_split=2, n_estimators=87;, score=0.916 total time=   0.1s
[CV 1/3] END criterion=entropy, max_depth=4, max_features=log2, min_samples_leaf=2, min_samples_split=2, n_estimators=125;, score=0.632 total time=   0.2s
[CV 2/3] END criterion=entropy, max_depth=4, max_features=log2, min_samples_leaf=2, min_samples_split=2, n_estimators=162;, score=0.657 total time=   0.2s
[CV 3/3] END criterion=entropy, max_depth=4, max_features=log2, min_samples_leaf=2, min_samples_split=2, n_estimators=200;, score=0.904 total time=   0.3s
[CV 2/3] END criterion=entropy, max_depth=4, max_features=log2, min_samples_leaf=2, min_samples_split=5, n_estimators=125;, score=0.616 total time=   0.2s
[CV 3/3] END criterion=entropy, max_depth=4, max_features=log2, min_samples_leaf=2, min_samples_split=5, n_estimators=162;, score=0.843 total time=   0.2s
[CV 1/3] END criterion=log_loss, max_depth=2, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=50;, score=0.667 total time=   0.1s
[CV 3/3] END criterion=log_loss, max_depth=2, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=50;, score=0.551 total time=   0.1s
[CV 3/3] END criterion=log_loss, max_depth=2, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=87;, score=0.728 total time=   0.2s
[CV 1/3] END criterion=log_loss, max_depth=2, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=162;, score=0.929 total time=   0.3s
[CV 2/3] END criterion=log_loss, max_depth=2, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=200;, score=0.687 total time=   0.4s
[CV 3/3] END criterion=log_loss, max_depth=2, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=87;, score=0.713 total time=   0.2s
[CV 1/3] END criterion=log_loss, max_depth=2, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=162;, score=0.715 total time=   0.3s
[CV 2/3] END criterion=log_loss, max_depth=2, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=200;, score=0.728 total time=   0.4s
[CV 1/3] END criterion=log_loss, max_depth=2, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=87;, score=0.705 total time=   0.1s
[CV 1/3] END criterion=log_loss, max_depth=2, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=125;, score=0.698 total time=   0.2s
[CV 2/3] END criterion=log_loss, max_depth=2, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=162;, score=0.765 total time=   0.4s
[CV 3/3] END criterion=log_loss, max_depth=2, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=200;, score=0.741 total time=   0.4s
[CV 3/3] END criterion=log_loss, max_depth=2, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=125;, score=0.713 total time=   0.3s
[CV 1/3] END criterion=log_loss, max_depth=2, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=200;, score=0.751 total time=   0.4s
[CV 2/3] END criterion=log_loss, max_depth=2, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=50;, score=0.617 total time=   0.1s
[CV 2/3] END criterion=log_loss, max_depth=2, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=87;, score=0.707 total time=   0.2s
[CV 3/3] END criterion=log_loss, max_depth=2, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=125;, score=0.665 total time=   0.2s
[CV 1/3] END criterion=log_loss, max_depth=2, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=200;, score=0.789 total time=   0.3s
[CV 2/3] END criterion=log_loss, max_depth=2, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=50;, score=0.622 total time=   0.1s
[CV 1/3] END criterion=log_loss, max_depth=2, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=87;, score=0.689 total time=   0.1s
[CV 1/3] END criterion=log_loss, max_depth=2, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=125;, score=0.721 total time=   0.2s
[CV 2/3] END criterion=log_loss, max_depth=2, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=162;, score=0.790 total time=   0.3s
[CV 3/3] END criterion=log_loss, max_depth=2, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=200;, score=0.722 total time=   0.4s
[CV 1/3] END criterion=log_loss, max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=162;, score=0.735 total time=   0.3s
[CV 2/3] END criterion=log_loss, max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=200;, score=0.739 total time=   0.4s
[CV 1/3] END criterion=log_loss, max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=87;, score=0.759 total time=   0.2s
[CV 1/3] END criterion=log_loss, max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=125;, score=0.768 total time=   0.3s
[CV 2/3] END criterion=log_loss, max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=162;, score=0.706 total time=   0.3s
[CV 3/3] END criterion=log_loss, max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=200;, score=0.718 total time=   0.4s
[CV 1/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=1, min_samples_split=2, n_estimators=162;, score=0.437 total time=   0.3s
[CV 2/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=1, min_samples_split=2, n_estimators=200;, score=0.921 total time=   0.3s
[CV 3/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=1, min_samples_split=5, n_estimators=87;, score=0.389 total time=   0.1s
[CV 1/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=1, min_samples_split=5, n_estimators=162;, score=0.568 total time=   0.2s
[CV 2/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=1, min_samples_split=5, n_estimators=200;, score=0.911 total time=   0.2s
[CV 3/3] END criterion=entropy, max_depth=2, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=125;, score=0.750 total time=   0.3s
[CV 1/3] END criterion=entropy, max_depth=2, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=200;, score=0.730 total time=   0.3s
[CV 3/3] END criterion=entropy, max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=50;, score=0.727 total time=   0.1s
[CV 3/3] END criterion=entropy, max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=87;, score=0.658 total time=   0.1s
[CV 1/3] END criterion=entropy, max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=162;, score=0.731 total time=   0.2s
[CV 2/3] END criterion=entropy, max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=200;, score=0.793 total time=   0.3s
[CV 1/3] END criterion=entropy, max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=87;, score=0.640 total time=   0.1s
[CV 1/3] END criterion=entropy, max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=125;, score=0.774 total time=   0.2s
[CV 2/3] END criterion=entropy, max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=162;, score=0.751 total time=   0.2s
[CV 3/3] END criterion=entropy, max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=200;, score=0.733 total time=   0.4s
[CV 1/3] END criterion=entropy, max_depth=2, max_features=log2, min_samples_leaf=1, min_samples_split=2, n_estimators=162;, score=0.450 total time=   0.2s
[CV 2/3] END criterion=entropy, max_depth=2, max_features=log2, min_samples_leaf=1, min_samples_split=2, n_estimators=200;, score=0.925 total time=   0.2s
[CV 3/3] END criterion=entropy, max_depth=2, max_features=log2, min_samples_leaf=1, min_samples_split=5, n_estimators=87;, score=0.520 total time=   0.1s
[CV 1/3] END criterion=entropy, max_depth=2, max_features=log2, min_samples_leaf=1, min_samples_split=5, n_estimators=162;, score=0.415 total time=   0.2s
[CV 2/3] END criterion=entropy, max_depth=2, max_features=log2, min_samples_leaf=1, min_samples_split=5, n_estimators=200;, score=0.941 total time=   0.2s
[CV 3/3] END criterion=entropy, max_depth=2, max_features=log2, min_samples_leaf=2, min_samples_split=2, n_estimators=87;, score=0.941 total time=   0.1s
[CV 1/3] END criterion=entropy, max_depth=2, max_features=log2, min_samples_leaf=2, min_samples_split=2, n_estimators=162;, score=0.876 total time=   0.2s
[CV 2/3] END criterion=entropy, max_depth=2, max_features=log2, min_samples_leaf=2, min_samples_split=2, n_estimators=200;, score=0.653 total time=   0.2s
[CV 1/3] END criterion=entropy, max_depth=2, max_features=log2, min_samples_leaf=2, min_samples_split=5, n_estimators=87;, score=0.346 total time=   0.1s
[CV 1/3] END criterion=entropy, max_depth=2, max_features=log2, min_samples_leaf=2, min_samples_split=5, n_estimators=125;, score=0.429 total time=   0.2s
[CV 2/3] END criterion=entropy, max_depth=2, max_features=log2, min_samples_leaf=2, min_samples_split=5, n_estimators=162;, score=0.907 total time=   0.2s
[CV 3/3] END criterion=entropy, max_depth=2, max_features=log2, min_samples_leaf=2, min_samples_split=5, n_estimators=200;, score=0.681 total time=   0.3s
[CV 1/3] END criterion=entropy, max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=125;, score=0.728 total time=   0.4s
[CV 2/3] END criterion=entropy, max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=162;, score=0.738 total time=   0.5s
[CV 3/3] END criterion=entropy, max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=200;, score=0.714 total time=   0.6s
[CV 1/3] END criterion=entropy, max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=125;, score=0.729 total time=   0.4s
[CV 2/3] END criterion=entropy, max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=162;, score=0.721 total time=   0.5s
[CV 3/3] END criterion=entropy, max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=200;, score=0.781 total time=   0.6s
[CV 1/3] END criterion=entropy, max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=125;, score=0.747 total time=   0.3s
[CV 2/3] END criterion=entropy, max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=162;, score=0.715 total time=   0.4s
[CV 3/3] END criterion=entropy, max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=200;, score=0.729 total time=   0.5s
[CV 3/3] END criterion=entropy, max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=125;, score=0.740 total time=   0.3s
[CV 1/3] END criterion=entropy, max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=200;, score=0.770 total time=   0.5s
[CV 2/3] END criterion=entropy, max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=50;, score=0.694 total time=   0.1s
[CV 2/3] END criterion=entropy, max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=87;, score=0.779 total time=   0.2s
[CV 2/3] END criterion=entropy, max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=125;, score=0.743 total time=   0.3s
[CV 3/3] END criterion=entropy, max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=162;, score=0.767 total time=   0.4s
[CV 1/3] END criterion=entropy, max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=50;, score=0.713 total time=   0.1s
[CV 3/3] END criterion=entropy, max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=50;, score=0.688 total time=   0.1s
[CV 3/3] END criterion=entropy, max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=87;, score=0.674 total time=   0.2s
[CV 1/3] END criterion=entropy, max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=162;, score=0.730 total time=   0.4s
[CV 2/3] END criterion=entropy, max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=200;, score=0.757 total time=   0.5s
[CV 1/3] END criterion=entropy, max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=87;, score=0.685 total time=   0.2s
[CV 1/3] END criterion=entropy, max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=125;, score=0.742 total time=   0.3s
[CV 2/3] END criterion=entropy, max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=162;, score=0.746 total time=   0.4s
[CV 3/3] END criterion=entropy, max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=200;, score=0.751 total time=   0.5s
[CV 1/3] END criterion=entropy, max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=162;, score=0.753 total time=   0.4s
[CV 2/3] END criterion=entropy, max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=200;, score=0.763 total time=   0.5s
[CV 3/3] END criterion=entropy, max_depth=4, max_features=log2, min_samples_leaf=1, min_samples_split=2, n_estimators=87;, score=0.648 total time=   0.1s
[CV 3/3] END criterion=entropy, max_depth=4, max_features=log2, min_samples_leaf=1, min_samples_split=2, n_estimators=125;, score=0.701 total time=   0.2s
[CV 1/3] END criterion=entropy, max_depth=4, max_features=log2, min_samples_leaf=1, min_samples_split=2, n_estimators=200;, score=0.663 total time=   0.3s
[CV 3/3] END criterion=entropy, max_depth=4, max_features=log2, min_samples_leaf=1, min_samples_split=5, n_estimators=50;, score=0.944 total time=   0.1s
[CV 2/3] END criterion=entropy, max_depth=4, max_features=log2, min_samples_leaf=1, min_samples_split=5, n_estimators=87;, score=0.631 total time=   0.1s
[CV 3/3] END criterion=entropy, max_depth=4, max_features=log2, min_samples_leaf=1, min_samples_split=5, n_estimators=125;, score=0.913 total time=   0.2s
[CV 1/3] END criterion=entropy, max_depth=4, max_features=log2, min_samples_leaf=1, min_samples_split=5, n_estimators=200;, score=0.702 total time=   0.3s
[CV 2/3] END criterion=entropy, max_depth=4, max_features=log2, min_samples_leaf=2, min_samples_split=2, n_estimators=50;, score=0.938 total time=   0.1s
[CV 1/3] END criterion=entropy, max_depth=4, max_features=log2, min_samples_leaf=2, min_samples_split=2, n_estimators=87;, score=0.918 total time=   0.1s
[CV 1/3] END criterion=entropy, max_depth=4, max_features=log2, min_samples_leaf=2, min_samples_split=2, n_estimators=162;, score=0.655 total time=   0.2s
[CV 2/3] END criterion=entropy, max_depth=4, max_features=log2, min_samples_leaf=2, min_samples_split=2, n_estimators=200;, score=0.763 total time=   0.3s
[CV 1/3] END criterion=entropy, max_depth=4, max_features=log2, min_samples_leaf=2, min_samples_split=5, n_estimators=87;, score=0.407 total time=   0.1s
[CV 1/3] END criterion=entropy, max_depth=4, max_features=log2, min_samples_leaf=2, min_samples_split=5, n_estimators=125;, score=0.922 total time=   0.2s
[CV 2/3] END criterion=entropy, max_depth=4, max_features=log2, min_samples_leaf=2, min_samples_split=5, n_estimators=162;, score=0.891 total time=   0.2s
[CV 3/3] END criterion=entropy, max_depth=4, max_features=log2, min_samples_leaf=2, min_samples_split=5, n_estimators=200;, score=0.663 total time=   0.3s
[CV 1/3] END criterion=log_loss, max_depth=2, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=125;, score=0.777 total time=   0.2s
[CV 2/3] END criterion=log_loss, max_depth=2, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=162;, score=0.696 total time=   0.3s
[CV 3/3] END criterion=log_loss, max_depth=2, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=200;, score=0.702 total time=   0.4s
[CV 1/3] END criterion=log_loss, max_depth=2, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=125;, score=0.718 total time=   0.2s
[CV 2/3] END criterion=log_loss, max_depth=2, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=162;, score=0.709 total time=   0.3s
[CV 3/3] END criterion=log_loss, max_depth=2, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=200;, score=0.713 total time=   0.3s
[CV 1/3] END criterion=log_loss, max_depth=2, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=162;, score=0.696 total time=   0.3s
[CV 2/3] END criterion=log_loss, max_depth=2, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=200;, score=0.747 total time=   0.4s
[CV 1/3] END criterion=log_loss, max_depth=2, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=87;, score=0.730 total time=   0.2s
[CV 1/3] END criterion=log_loss, max_depth=2, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=125;, score=0.606 total time=   0.3s
[CV 2/3] END criterion=log_loss, max_depth=2, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=162;, score=0.728 total time=   0.4s
[CV 3/3] END criterion=log_loss, max_depth=2, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=200;, score=0.694 total time=   0.4s
[CV 2/3] END criterion=log_loss, max_depth=2, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=125;, score=0.758 total time=   0.2s
[CV 3/3] END criterion=log_loss, max_depth=2, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=162;, score=0.696 total time=   0.3s
[CV 1/3] END criterion=log_loss, max_depth=2, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=50;, score=0.468 total time=   0.1s
[CV 3/3] END criterion=log_loss, max_depth=2, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=50;, score=0.953 total time=   0.1s
[CV 3/3] END criterion=log_loss, max_depth=2, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=87;, score=0.683 total time=   0.1s
[CV 3/3] END criterion=log_loss, max_depth=2, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=125;, score=0.763 total time=   0.2s
[CV 1/3] END criterion=log_loss, max_depth=2, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=200;, score=0.703 total time=   0.4s
[CV 2/3] END criterion=log_loss, max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=50;, score=0.958 total time=   0.1s
[CV 2/3] END criterion=log_loss, max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=87;, score=0.942 total time=   0.2s
[CV 2/3] END criterion=log_loss, max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=125;, score=0.720 total time=   0.2s
[CV 3/3] END criterion=log_loss, max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=162;, score=0.731 total time=   0.3s
[CV 1/3] END criterion=log_loss, max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=50;, score=0.590 total time=   0.1s
[CV 3/3] END criterion=log_loss, max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=50;, score=0.513 total time=   0.1s
[CV 3/3] END criterion=log_loss, max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=87;, score=0.709 total time=   0.2s
[CV 1/3] END criterion=log_loss, max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=162;, score=0.778 total time=   0.3s
[CV 2/3] END criterion=log_loss, max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=200;, score=0.742 total time=   0.4s
[CV 3/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=1, min_samples_split=2, n_estimators=87;, score=0.352 total time=   0.1s
[CV 3/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=1, min_samples_split=2, n_estimators=125;, score=0.933 total time=   0.2s
[CV 1/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=1, min_samples_split=2, n_estimators=200;, score=0.898 total time=   0.3s
[CV 3/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=1, min_samples_split=5, n_estimators=50;, score=0.974 total time=   0.1s
[CV 2/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=1, min_samples_split=5, n_estimators=87;, score=0.531 total time=   0.1s
[CV 3/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=1, min_samples_split=5, n_estimators=125;, score=0.562 total time=   0.1s
[CV 1/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=1, min_samples_split=5, n_estimators=200;, score=0.906 total time=   0.2s
[CV 3/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=2, min_samples_split=2, n_estimators=50;, score=0.961 total time=   0.1s
[CV 3/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=2, min_samples_split=2, n_estimators=87;, score=0.952 total time=   0.1s
[CV 1/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=2, min_samples_split=2, n_estimators=162;, score=0.606 total time=   0.2s
[CV 2/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=2, min_samples_split=2, n_estimators=200;, score=0.595 total time=   0.3s
[CV 1/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=2, min_samples_split=5, n_estimators=87;, score=0.269 total time=   0.1s
[CV 1/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=2, min_samples_split=5, n_estimators=125;, score=0.902 total time=   0.1s
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
/Users/manveerkaur/miniconda3/envs/tensorflow/lib/python3.10/site-packages/sklearn/ensemble/_forest.py:424: FutureWarning: `max_features='auto'` has been deprecated in 1.1 and will be removed in 1.3. To keep the past behaviour, explicitly set `max_features='sqrt'` or remove this parameter as it is also the default value for RandomForestClassifiers and ExtraTreesClassifiers.
  warn(
Out[102]:
GridSearchCV(cv=3, estimator=RandomForestClassifier(), n_jobs=4,
             param_grid={'max_depth': [2, 4], 'max_features': ['auto', 'sqrt'],
                         'min_samples_leaf': [1, 2],
                         'min_samples_split': [2, 5],
                         'n_estimators': [50, 87, 125, 162, 200]},
             scoring='recall', verbose=3)
In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook.
On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.
GridSearchCV(cv=3, estimator=RandomForestClassifier(), n_jobs=4,
             param_grid={'max_depth': [2, 4], 'max_features': ['auto', 'sqrt'],
                         'min_samples_leaf': [1, 2],
                         'min_samples_split': [2, 5],
                         'n_estimators': [50, 87, 125, 162, 200]},
             scoring='recall', verbose=3)
RandomForestClassifier()
RandomForestClassifier()
In [103]:
rf_grid.best_params_
Out[103]:
{'max_depth': 2,
 'max_features': 'auto',
 'min_samples_leaf': 1,
 'min_samples_split': 2,
 'n_estimators': 125}
In [104]:
rf_clf_tunned = RandomForestClassifier(n_estimators = 125, max_depth = 2, max_features = 'auto', min_samples_split=2)
rf_clf_tunned.fit(tfidf_tr, y_tr)
t_rf_test_preds_lem = rf_clf_tunned.predict(tfidf_val)
In [105]:
t_rf_precision = precision_score(y_val, t_rf_test_preds_lem)
t_rf_recall = recall_score(y_val, t_rf_test_preds_lem)
t_rf_acc_score = accuracy_score(y_val, t_rf_test_preds_lem)
t_rf_f1_score = f1_score(y_val, t_rf_test_preds_lem)
print('Random Forest with Hyper Parameters selected with GridSearch:')

print('Precision: {:.4}'.format(t_rf_precision))
print('Recall: {:.4}'.format(t_rf_recall))

print("Testing Accuracy: {:.4}".format(t_rf_acc_score))
print("F1 Score: {:.4}".format(t_rf_f1_score))
Random Forest with Hyper Parameters selected with GridSearch:
Precision: 0.9165
Recall: 0.7638
Testing Accuracy: 0.847
F1 Score: 0.8332
In [106]:
fig, ax = plt.subplots(figsize=(6,6))
mat = confusion_matrix(y_val, t_rf_test_preds_lem)
sn.heatmap(mat.T, square=True, annot=True, fmt='d', cbar=False,
            xticklabels=['Not_Hate_Speech', 'Hate_Speech'], yticklabels=['Not_Hate_Speech', 'Hate_Speech'])
plt.xlabel('true label')
plt.ylabel('predicted label')
plt.show()
[CV 1/3] END criterion=log_loss, max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=87;, score=0.740 total time=   0.2s
[CV 1/3] END criterion=log_loss, max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=125;, score=0.756 total time=   0.2s
[CV 2/3] END criterion=log_loss, max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=162;, score=0.704 total time=   0.3s
[CV 3/3] END criterion=log_loss, max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=200;, score=0.702 total time=   0.4s
[CV 3/3] END criterion=log_loss, max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=125;, score=0.758 total time=   0.3s
[CV 1/3] END criterion=log_loss, max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=200;, score=0.681 total time=   0.4s
[CV 3/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=1, min_samples_split=2, n_estimators=50;, score=0.386 total time=   0.1s
[CV 2/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=1, min_samples_split=2, n_estimators=87;, score=0.955 total time=   0.1s
[CV 2/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=1, min_samples_split=2, n_estimators=125;, score=0.963 total time=   0.2s
[CV 3/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=1, min_samples_split=2, n_estimators=162;, score=0.924 total time=   0.3s
[CV 1/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=1, min_samples_split=5, n_estimators=50;, score=0.967 total time=   0.1s
[CV 2/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=1, min_samples_split=5, n_estimators=50;, score=0.969 total time=   0.1s
[CV 1/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=1, min_samples_split=5, n_estimators=87;, score=0.358 total time=   0.1s
[CV 2/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=1, min_samples_split=5, n_estimators=125;, score=0.965 total time=   0.1s
[CV 3/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=1, min_samples_split=5, n_estimators=162;, score=0.512 total time=   0.2s
[CV 1/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=2, min_samples_split=2, n_estimators=50;, score=0.248 total time=   0.1s
[CV 2/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=2, min_samples_split=2, n_estimators=50;, score=0.363 total time=   0.1s
[CV 2/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=2, min_samples_split=2, n_estimators=87;, score=0.930 total time=   0.1s
[CV 3/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=2, min_samples_split=2, n_estimators=125;, score=0.926 total time=   0.1s
[CV 1/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=2, min_samples_split=2, n_estimators=200;, score=0.951 total time=   0.3s
[CV 3/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=2, min_samples_split=5, n_estimators=50;, score=0.964 total time=   0.1s
[CV 3/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=2, min_samples_split=5, n_estimators=87;, score=0.951 total time=   0.1s
[CV 1/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=2, min_samples_split=5, n_estimators=162;, score=0.547 total time=   0.2s
[CV 2/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=2, min_samples_split=5, n_estimators=200;, score=0.592 total time=   0.2s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=50;, score=0.710 total time=   0.1s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=125;, score=0.774 total time=   0.3s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=162;, score=0.736 total time=   0.4s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=200;, score=0.774 total time=   0.5s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=125;, score=0.735 total time=   0.3s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=162;, score=0.732 total time=   0.4s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=200;, score=0.754 total time=   0.5s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=125;, score=0.803 total time=   0.3s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=162;, score=0.723 total time=   0.4s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=50;, score=0.706 total time=   0.1s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=50;, score=0.755 total time=   0.1s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=87;, score=0.809 total time=   0.2s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=125;, score=0.725 total time=   0.3s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=200;, score=0.744 total time=   0.6s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=50;, score=0.706 total time=   0.2s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=87;, score=0.735 total time=   0.3s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=125;, score=0.766 total time=   0.3s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=162;, score=0.777 total time=   0.4s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=50;, score=0.747 total time=   0.1s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=50;, score=0.733 total time=   0.1s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=87;, score=0.740 total time=   0.2s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=125;, score=0.752 total time=   0.3s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=162;, score=0.743 total time=   0.4s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=50;, score=0.625 total time=   0.1s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=50;, score=0.717 total time=   0.1s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=87;, score=0.781 total time=   0.2s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=125;, score=0.719 total time=   0.3s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=200;, score=0.743 total time=   0.5s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=50;, score=0.738 total time=   0.1s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=87;, score=0.713 total time=   0.2s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=125;, score=0.750 total time=   0.3s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=200;, score=0.757 total time=   0.5s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=1, min_samples_split=2, n_estimators=50;, score=0.949 total time=   0.1s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=1, min_samples_split=2, n_estimators=87;, score=0.490 total time=   0.1s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=1, min_samples_split=2, n_estimators=125;, score=0.690 total time=   0.2s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=1, min_samples_split=2, n_estimators=162;, score=0.721 total time=   0.2s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=1, min_samples_split=5, n_estimators=50;, score=0.387 total time=   0.1s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=1, min_samples_split=5, n_estimators=50;, score=0.929 total time=   0.1s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=1, min_samples_split=5, n_estimators=87;, score=0.608 total time=   0.1s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=1, min_samples_split=5, n_estimators=125;, score=0.679 total time=   0.2s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=1, min_samples_split=5, n_estimators=162;, score=0.658 total time=   0.3s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=2, min_samples_split=2, n_estimators=50;, score=0.388 total time=   0.1s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=2, min_samples_split=2, n_estimators=50;, score=0.270 total time=   0.1s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=2, min_samples_split=2, n_estimators=87;, score=0.624 total time=   0.1s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=2, min_samples_split=2, n_estimators=125;, score=0.886 total time=   0.2s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=2, min_samples_split=2, n_estimators=162;, score=0.910 total time=   0.2s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=2, min_samples_split=5, n_estimators=50;, score=0.941 total time=   0.1s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=2, min_samples_split=5, n_estimators=50;, score=0.940 total time=   0.1s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=2, min_samples_split=5, n_estimators=87;, score=0.564 total time=   0.1s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=2, min_samples_split=5, n_estimators=125;, score=0.891 total time=   0.1s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=2, min_samples_split=5, n_estimators=162;, score=0.664 total time=   0.2s
[CV 1/3] END max_depth=2, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=50;, score=0.618 total time=   0.1s
[CV 3/3] END max_depth=2, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=87;, score=0.735 total time=   0.2s
[CV 2/3] END max_depth=2, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=162;, score=0.723 total time=   0.3s
[CV 3/3] END max_depth=2, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=162;, score=0.701 total time=   0.3s
[CV 1/3] END max_depth=2, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=87;, score=0.713 total time=   0.1s
[CV 2/3] END max_depth=2, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=87;, score=0.740 total time=   0.1s
[CV 3/3] END max_depth=2, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=162;, score=0.707 total time=   0.3s
[CV 1/3] END max_depth=2, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=200;, score=0.768 total time=   0.3s
[CV 1/3] END max_depth=2, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=125;, score=0.673 total time=   0.2s
[CV 2/3] END max_depth=2, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=125;, score=0.754 total time=   0.2s
[CV 3/3] END max_depth=2, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=200;, score=0.755 total time=   0.3s
[CV 1/3] END max_depth=2, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=50;, score=0.596 total time=   0.1s
[CV 1/3] END max_depth=2, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=162;, score=0.782 total time=   0.4s
[CV 2/3] END max_depth=2, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=162;, score=0.738 total time=   0.4s
[CV 3/3] END max_depth=2, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=50;, score=0.662 total time=   0.1s
[CV 1/3] END max_depth=2, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=87;, score=0.712 total time=   0.2s
[CV 3/3] END max_depth=2, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=125;, score=0.683 total time=   0.3s
[CV 1/3] END max_depth=2, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=162;, score=0.708 total time=   0.3s
[CV 2/3] END max_depth=2, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=50;, score=0.591 total time=   0.1s
[CV 3/3] END max_depth=2, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=50;, score=0.595 total time=   0.1s
[CV 1/3] END max_depth=2, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=87;, score=0.703 total time=   0.1s
[CV 2/3] END max_depth=2, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=87;, score=0.744 total time=   0.1s
[CV 3/3] END max_depth=2, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=162;, score=0.758 total time=   0.2s
[CV 1/3] END max_depth=2, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=200;, score=0.739 total time=   0.3s
[CV 1/3] END max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=125;, score=0.754 total time=   0.2s
[CV 2/3] END max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=125;, score=0.757 total time=   0.2s
[CV 3/3] END max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=200;, score=0.732 total time=   0.3s
[CV 1/3] END max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=50;, score=0.521 total time=   0.1s
[CV 1/3] END max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=162;, score=0.783 total time=   0.2s
[CV 2/3] END max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=162;, score=0.750 total time=   0.2s
[CV 3/3] END max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=50;, score=0.645 total time=   0.1s
[CV 1/3] END max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=87;, score=0.616 total time=   0.2s
[CV 2/3] END max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=162;, score=0.749 total time=   0.4s
[CV 3/3] END max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=162;, score=0.798 total time=   0.4s
[CV 1/3] END max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=87;, score=0.792 total time=   0.2s
[CV 2/3] END max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=87;, score=0.732 total time=   0.2s
[CV 3/3] END max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=162;, score=0.752 total time=   0.4s
[CV 1/3] END max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=200;, score=0.744 total time=   0.5s
[CV 1/3] END max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=125;, score=0.774 total time=   0.3s
[CV 2/3] END max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=125;, score=0.756 total time=   0.3s
[CV 3/3] END max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=200;, score=0.739 total time=   0.5s
[CV 1/3] END max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=50;, score=0.733 total time=   0.1s
[CV 2/3] END max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=125;, score=0.674 total time=   0.3s
[CV 3/3] END max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=125;, score=0.719 total time=   0.3s
[CV 1/3] END max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=50;, score=0.685 total time=   0.1s
[CV 2/3] END max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=50;, score=0.737 total time=   0.1s
[CV 2/3] END max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=87;, score=0.723 total time=   0.2s
[CV 3/3] END max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=87;, score=0.673 total time=   0.2s
[CV 2/3] END max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=162;, score=0.716 total time=   0.4s
[CV 3/3] END max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=162;, score=0.663 total time=   0.4s
[CV 1/3] END max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=87;, score=0.731 total time=   0.2s
[CV 2/3] END max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=87;, score=0.726 total time=   0.2s
[CV 3/3] END max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=162;, score=0.743 total time=   0.4s
[CV 1/3] END max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=200;, score=0.751 total time=   0.5s
[CV 1/3] END max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=125;, score=0.721 total time=   0.3s
[CV 2/3] END max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=125;, score=0.727 total time=   0.3s
[CV 3/3] END max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=200;, score=0.738 total time=   0.5s
[CV 1/3] END max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=50;, score=0.741 total time=   0.1s
[CV 2/3] END max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=125;, score=0.713 total time=   0.3s
[CV 3/3] END max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=125;, score=0.749 total time=   0.3s
[CV 3/3] END criterion=log_loss, max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=87;, score=0.957 total time=   0.2s
[CV 3/3] END criterion=log_loss, max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=125;, score=0.647 total time=   0.2s
[CV 1/3] END criterion=log_loss, max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=200;, score=0.776 total time=   0.4s
[CV 2/3] END criterion=log_loss, max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=50;, score=0.611 total time=   0.1s
[CV 2/3] END criterion=log_loss, max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=87;, score=0.759 total time=   0.2s
[CV 2/3] END criterion=log_loss, max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=125;, score=0.770 total time=   0.3s
[CV 3/3] END criterion=log_loss, max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=162;, score=0.768 total time=   0.3s
[CV 1/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=1, min_samples_split=2, n_estimators=50;, score=0.370 total time=   0.1s
[CV 2/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=1, min_samples_split=2, n_estimators=50;, score=0.043 total time=   0.1s
[CV 1/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=1, min_samples_split=2, n_estimators=87;, score=0.954 total time=   0.1s
[CV 1/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=1, min_samples_split=2, n_estimators=125;, score=0.937 total time=   0.2s
[CV 2/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=1, min_samples_split=2, n_estimators=162;, score=0.928 total time=   0.3s
[CV 3/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=1, min_samples_split=2, n_estimators=200;, score=0.958 total time=   0.3s
[CV 1/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=1, min_samples_split=5, n_estimators=125;, score=0.933 total time=   0.1s
[CV 2/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=1, min_samples_split=5, n_estimators=162;, score=0.918 total time=   0.2s
[CV 3/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=1, min_samples_split=5, n_estimators=200;, score=0.950 total time=   0.2s
[CV 1/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=2, min_samples_split=2, n_estimators=125;, score=0.973 total time=   0.1s
[CV 2/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=2, min_samples_split=2, n_estimators=162;, score=0.884 total time=   0.2s
[CV 1/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=2, min_samples_split=5, n_estimators=50;, score=0.961 total time=   0.1s
[CV 2/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=2, min_samples_split=5, n_estimators=50;, score=0.971 total time=   0.1s
[CV 2/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=2, min_samples_split=5, n_estimators=87;, score=0.387 total time=   0.1s
[CV 2/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=2, min_samples_split=5, n_estimators=125;, score=0.514 total time=   0.1s
[CV 1/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=2, min_samples_split=5, n_estimators=200;, score=0.876 total time=   0.2s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=50;, score=0.729 total time=   0.1s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=87;, score=0.792 total time=   0.2s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=125;, score=0.804 total time=   0.3s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=200;, score=0.728 total time=   0.5s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=50;, score=0.744 total time=   0.1s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=87;, score=0.656 total time=   0.2s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=162;, score=0.778 total time=   0.4s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=200;, score=0.727 total time=   0.5s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=87;, score=0.681 total time=   0.2s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=125;, score=0.734 total time=   0.3s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=162;, score=0.820 total time=   0.4s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=200;, score=0.739 total time=   0.5s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=162;, score=0.736 total time=   0.4s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=200;, score=0.759 total time=   0.7s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=87;, score=0.730 total time=   0.3s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=125;, score=0.680 total time=   0.3s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=162;, score=0.758 total time=   0.4s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=200;, score=0.746 total time=   0.5s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=125;, score=0.715 total time=   0.3s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=200;, score=0.755 total time=   0.5s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=50;, score=0.706 total time=   0.1s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=87;, score=0.712 total time=   0.2s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=125;, score=0.742 total time=   0.3s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=162;, score=0.720 total time=   0.4s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=50;, score=0.657 total time=   0.1s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=50;, score=0.737 total time=   0.1s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=87;, score=0.738 total time=   0.2s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=125;, score=0.682 total time=   0.3s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=162;, score=0.700 total time=   0.4s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=1, min_samples_split=2, n_estimators=50;, score=0.476 total time=   0.1s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=1, min_samples_split=2, n_estimators=50;, score=0.392 total time=   0.1s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=1, min_samples_split=2, n_estimators=87;, score=0.889 total time=   0.1s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=1, min_samples_split=2, n_estimators=125;, score=0.916 total time=   0.2s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=1, min_samples_split=2, n_estimators=162;, score=0.895 total time=   0.2s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=1, min_samples_split=2, n_estimators=200;, score=0.681 total time=   0.3s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=1, min_samples_split=5, n_estimators=87;, score=0.940 total time=   0.1s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=1, min_samples_split=5, n_estimators=162;, score=0.657 total time=   0.3s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=1, min_samples_split=5, n_estimators=200;, score=0.721 total time=   0.3s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=2, min_samples_split=2, n_estimators=87;, score=0.921 total time=   0.1s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=2, min_samples_split=2, n_estimators=162;, score=0.916 total time=   0.2s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=2, min_samples_split=2, n_estimators=200;, score=0.810 total time=   0.2s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=2, min_samples_split=5, n_estimators=87;, score=0.645 total time=   0.1s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=2, min_samples_split=5, n_estimators=162;, score=0.686 total time=   0.2s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=2, min_samples_split=5, n_estimators=200;, score=0.881 total time=   0.2s
[CV 2/3] END max_depth=2, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=50;, score=0.647 total time=   0.1s
[CV 2/3] END max_depth=2, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=87;, score=0.951 total time=   0.2s
[CV 3/3] END max_depth=2, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=125;, score=0.786 total time=   0.2s
[CV 1/3] END max_depth=2, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=162;, score=0.766 total time=   0.3s
[CV 2/3] END max_depth=2, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=50;, score=0.580 total time=   0.1s
[CV 3/3] END max_depth=2, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=50;, score=0.544 total time=   0.1s
[CV 2/3] END max_depth=2, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=125;, score=0.753 total time=   0.2s
[CV 3/3] END max_depth=2, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=125;, score=0.801 total time=   0.3s
[CV 1/3] END max_depth=2, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=50;, score=0.545 total time=   0.1s
[CV 2/3] END max_depth=2, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=50;, score=0.563 total time=   0.1s
[CV 3/3] END max_depth=2, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=50;, score=0.656 total time=   0.1s
[CV 1/3] END max_depth=2, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=87;, score=0.721 total time=   0.1s
[CV 3/3] END max_depth=2, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=125;, score=0.764 total time=   0.2s
[CV 1/3] END max_depth=2, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=162;, score=0.735 total time=   0.3s
[CV 2/3] END max_depth=2, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=50;, score=0.650 total time=   0.1s
[CV 3/3] END max_depth=2, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=50;, score=0.612 total time=   0.1s
[CV 3/3] END max_depth=2, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=87;, score=0.692 total time=   0.2s
[CV 1/3] END max_depth=2, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=125;, score=0.664 total time=   0.3s
[CV 2/3] END max_depth=2, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=200;, score=0.728 total time=   0.5s
[CV 3/3] END max_depth=2, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=200;, score=0.807 total time=   0.4s
[CV 2/3] END max_depth=2, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=162;, score=0.800 total time=   0.3s
[CV 3/3] END max_depth=2, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=162;, score=0.700 total time=   0.3s
[CV 3/3] END max_depth=2, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=87;, score=0.702 total time=   0.1s
[CV 1/3] END max_depth=2, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=125;, score=0.619 total time=   0.2s
[CV 2/3] END max_depth=2, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=200;, score=0.772 total time=   0.3s
[CV 3/3] END max_depth=2, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=200;, score=0.781 total time=   0.3s
[CV 2/3] END max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=162;, score=0.714 total time=   0.2s
[CV 3/3] END max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=162;, score=0.700 total time=   0.2s
[CV 1/3] END max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=87;, score=0.598 total time=   0.1s
[CV 2/3] END max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=87;, score=0.754 total time=   0.1s
[CV 3/3] END max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=162;, score=0.672 total time=   0.2s
[CV 1/3] END max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=200;, score=0.718 total time=   0.3s
[CV 1/3] END max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=125;, score=0.756 total time=   0.3s
[CV 2/3] END max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=125;, score=0.748 total time=   0.3s
[CV 3/3] END max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=200;, score=0.729 total time=   0.5s
[CV 1/3] END max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=50;, score=0.715 total time=   0.1s
[CV 2/3] END max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=125;, score=0.682 total time=   0.3s
[CV 3/3] END max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=125;, score=0.768 total time=   0.3s
[CV 1/3] END max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=50;, score=0.688 total time=   0.1s
[CV 2/3] END max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=50;, score=0.739 total time=   0.1s
[CV 3/3] END max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=50;, score=0.717 total time=   0.1s
[CV 1/3] END max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=87;, score=0.678 total time=   0.2s
[CV 3/3] END max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=125;, score=0.709 total time=   0.3s
[CV 1/3] END max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=162;, score=0.741 total time=   0.4s
[CV 2/3] END max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=50;, score=0.709 total time=   0.1s
[CV 3/3] END max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=50;, score=0.714 total time=   0.1s
[CV 3/3] END max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=87;, score=0.722 total time=   0.2s
[CV 1/3] END max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=125;, score=0.758 total time=   0.3s
[CV 2/3] END max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=200;, score=0.768 total time=   0.5s
[CV 3/3] END max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=200;, score=0.777 total time=   0.5s
[CV 1/3] END max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=200;, score=0.754 total time=   0.5s
[CV 2/3] END max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=200;, score=0.763 total time=   0.5s
[CV 2/3] END max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=125;, score=0.752 total time=   0.3s
[CV 3/3] END max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=125;, score=0.730 total time=   0.3s
[CV 1/3] END max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=50;, score=0.733 total time=   0.1s
[CV 2/3] END max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=50;, score=0.724 total time=   0.1s
[CV 2/3] END max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=87;, score=0.740 total time=   0.2s
[CV 3/3] END max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=87;, score=0.708 total time=   0.2s
[CV 1/3] END max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=200;, score=0.749 total time=   0.5s
[CV 2/3] END max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=200;, score=0.759 total time=   0.4s
[CV 1/3] END max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=162;, score=0.758 total time=   0.4s
[CV 2/3] END max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=162;, score=0.776 total time=   0.4s
[CV 2/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=2, min_samples_split=5, n_estimators=162;, score=0.923 total time=   0.2s
[CV 3/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=2, min_samples_split=5, n_estimators=200;, score=0.534 total time=   0.2s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=87;, score=0.760 total time=   0.2s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=162;, score=0.733 total time=   0.4s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=200;, score=0.725 total time=   0.5s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=87;, score=0.712 total time=   0.2s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=125;, score=0.772 total time=   0.3s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=162;, score=0.771 total time=   0.4s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=50;, score=0.764 total time=   0.1s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=50;, score=0.712 total time=   0.1s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=87;, score=0.697 total time=   0.2s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=162;, score=0.759 total time=   0.4s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=200;, score=0.765 total time=   0.5s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=87;, score=0.675 total time=   0.2s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=125;, score=0.771 total time=   0.3s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=162;, score=0.761 total time=   0.5s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=50;, score=0.715 total time=   0.2s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=50;, score=0.690 total time=   0.2s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=87;, score=0.619 total time=   0.3s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=125;, score=0.759 total time=   0.3s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=200;, score=0.739 total time=   0.6s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=50;, score=0.688 total time=   0.1s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=87;, score=0.711 total time=   0.2s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=162;, score=0.741 total time=   0.4s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=200;, score=0.713 total time=   0.5s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=87;, score=0.724 total time=   0.2s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=125;, score=0.730 total time=   0.3s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=162;, score=0.785 total time=   0.4s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=200;, score=0.792 total time=   0.5s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=162;, score=0.735 total time=   0.4s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=200;, score=0.757 total time=   0.5s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=1, min_samples_split=2, n_estimators=87;, score=0.491 total time=   0.1s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=1, min_samples_split=2, n_estimators=125;, score=0.663 total time=   0.2s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=1, min_samples_split=2, n_estimators=200;, score=0.746 total time=   0.3s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=1, min_samples_split=5, n_estimators=50;, score=0.368 total time=   0.1s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=1, min_samples_split=5, n_estimators=87;, score=0.877 total time=   0.1s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=1, min_samples_split=5, n_estimators=125;, score=0.648 total time=   0.2s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=1, min_samples_split=5, n_estimators=162;, score=0.916 total time=   0.2s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=1, min_samples_split=5, n_estimators=200;, score=0.903 total time=   0.3s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=2, min_samples_split=2, n_estimators=125;, score=0.915 total time=   0.1s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=2, min_samples_split=2, n_estimators=200;, score=0.684 total time=   0.2s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=2, min_samples_split=5, n_estimators=50;, score=0.476 total time=   0.1s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=2, min_samples_split=5, n_estimators=87;, score=0.925 total time=   0.1s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=2, min_samples_split=5, n_estimators=125;, score=0.608 total time=   0.1s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=2, min_samples_split=5, n_estimators=162;, score=0.664 total time=   0.2s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=2, min_samples_split=5, n_estimators=200;, score=0.883 total time=   0.2s
[CV 1/3] END max_depth=2, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=87;, score=0.685 total time=   0.2s
[CV 2/3] END max_depth=2, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=125;, score=0.954 total time=   0.2s
[CV 3/3] END max_depth=2, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=200;, score=0.755 total time=   0.3s
[CV 1/3] END max_depth=2, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=50;, score=0.634 total time=   0.1s
[CV 3/3] END max_depth=2, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=87;, score=0.770 total time=   0.2s
[CV 1/3] END max_depth=2, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=125;, score=0.777 total time=   0.2s
[CV 2/3] END max_depth=2, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=200;, score=0.737 total time=   0.4s
[CV 3/3] END max_depth=2, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=200;, score=0.757 total time=   0.3s
[CV 1/3] END max_depth=2, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=200;, score=0.756 total time=   0.3s
[CV 2/3] END max_depth=2, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=200;, score=0.788 total time=   0.4s
[CV 2/3] END max_depth=2, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=125;, score=0.695 total time=   0.3s
[CV 3/3] END max_depth=2, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=125;, score=0.621 total time=   0.3s
[CV 1/3] END max_depth=2, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=50;, score=0.577 total time=   0.1s
[CV 2/3] END max_depth=2, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=50;, score=0.944 total time=   0.1s
[CV 2/3] END max_depth=2, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=87;, score=0.936 total time=   0.2s
[CV 3/3] END max_depth=2, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=87;, score=0.733 total time=   0.2s
[CV 1/3] END max_depth=2, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=200;, score=0.747 total time=   0.4s
[CV 2/3] END max_depth=2, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=200;, score=0.812 total time=   0.3s
[CV 2/3] END max_depth=2, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=125;, score=0.954 total time=   0.2s
[CV 3/3] END max_depth=2, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=125;, score=0.788 total time=   0.2s
[CV 1/3] END max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=50;, score=0.639 total time=   0.1s
[CV 2/3] END max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=50;, score=0.539 total time=   0.1s
[CV 2/3] END max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=87;, score=0.762 total time=   0.1s
[CV 3/3] END max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=87;, score=0.665 total time=   0.1s
[CV 1/3] END max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=200;, score=0.756 total time=   0.3s
[CV 2/3] END max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=200;, score=0.761 total time=   0.3s
[CV 2/3] END max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=125;, score=0.739 total time=   0.2s
[CV 3/3] END max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=125;, score=0.715 total time=   0.2s
[CV 1/3] END max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=50;, score=0.673 total time=   0.1s
[CV 2/3] END max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=50;, score=0.741 total time=   0.1s
[CV 2/3] END max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=87;, score=0.729 total time=   0.2s
[CV 3/3] END max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=87;, score=0.687 total time=   0.2s
[CV 1/3] END max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=200;, score=0.739 total time=   0.5s
[CV 2/3] END max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=200;, score=0.774 total time=   0.5s
[CV 1/3] END max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=162;, score=0.731 total time=   0.4s
[CV 2/3] END max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=162;, score=0.764 total time=   0.4s
[CV 2/3] END max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=87;, score=0.748 total time=   0.2s
[CV 3/3] END max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=87;, score=0.794 total time=   0.2s
[CV 2/3] END max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=162;, score=0.794 total time=   0.4s
[CV 3/3] END max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=162;, score=0.719 total time=   0.4s
[CV 1/3] END max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=87;, score=0.719 total time=   0.2s
[CV 2/3] END max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=87;, score=0.648 total time=   0.2s
[CV 3/3] END max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=162;, score=0.764 total time=   0.4s
[CV 1/3] END max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=200;, score=0.741 total time=   0.5s
[CV 3/3] END max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=125;, score=0.719 total time=   0.3s
[CV 1/3] END max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=162;, score=0.759 total time=   0.4s
[CV 2/3] END max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=50;, score=0.671 total time=   0.1s
[CV 3/3] END max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=50;, score=0.663 total time=   0.1s
[CV 3/3] END max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=87;, score=0.779 total time=   0.2s
[CV 1/3] END max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=125;, score=0.739 total time=   0.3s
[CV 2/3] END max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=200;, score=0.736 total time=   0.5s
[CV 3/3] END max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=200;, score=0.710 total time=   0.5s
[CV 2/3] END max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=162;, score=0.776 total time=   0.4s
[CV 3/3] END max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=162;, score=0.782 total time=   0.4s
[CV 1/3] END max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=87;, score=0.776 total time=   0.2s
[CV 2/3] END max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=87;, score=0.731 total time=   0.2s
[CV 3/3] END max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=162;, score=0.737 total time=   0.4s
[CV 1/3] END max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=200;, score=0.757 total time=   0.4s
[CV 1/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=2, min_samples_split=2, n_estimators=87;, score=0.904 total time=   0.1s
[CV 2/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=2, min_samples_split=2, n_estimators=125;, score=0.562 total time=   0.1s
[CV 3/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=2, min_samples_split=2, n_estimators=162;, score=0.924 total time=   0.2s
[CV 3/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=2, min_samples_split=2, n_estimators=200;, score=0.528 total time=   0.2s
[CV 3/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=2, min_samples_split=5, n_estimators=125;, score=0.925 total time=   0.1s
[CV 3/3] END criterion=log_loss, max_depth=2, max_features=log2, min_samples_leaf=2, min_samples_split=5, n_estimators=162;, score=0.936 total time=   0.2s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=50;, score=0.714 total time=   0.1s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=87;, score=0.748 total time=   0.2s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=125;, score=0.761 total time=   0.3s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=162;, score=0.770 total time=   0.4s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=50;, score=0.705 total time=   0.1s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=50;, score=0.720 total time=   0.1s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=87;, score=0.755 total time=   0.2s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=125;, score=0.737 total time=   0.3s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=200;, score=0.773 total time=   0.5s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=50;, score=0.730 total time=   0.1s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=87;, score=0.712 total time=   0.2s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=125;, score=0.757 total time=   0.3s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=200;, score=0.735 total time=   0.5s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=50;, score=0.961 total time=   0.1s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=87;, score=0.777 total time=   0.2s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=125;, score=0.721 total time=   0.3s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=162;, score=0.763 total time=   0.5s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=200;, score=0.715 total time=   0.7s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=162;, score=0.786 total time=   0.4s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=200;, score=0.717 total time=   0.5s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=87;, score=0.759 total time=   0.2s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=125;, score=0.793 total time=   0.3s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=162;, score=0.739 total time=   0.4s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=200;, score=0.759 total time=   0.5s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=162;, score=0.784 total time=   0.4s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=200;, score=0.761 total time=   0.4s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=87;, score=0.727 total time=   0.2s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=125;, score=0.712 total time=   0.3s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=162;, score=0.741 total time=   0.4s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=200;, score=0.747 total time=   0.5s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=1, min_samples_split=2, n_estimators=162;, score=0.652 total time=   0.2s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=1, min_samples_split=2, n_estimators=200;, score=0.719 total time=   0.3s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=1, min_samples_split=5, n_estimators=125;, score=0.907 total time=   0.2s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=1, min_samples_split=5, n_estimators=200;, score=0.883 total time=   0.3s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=2, min_samples_split=2, n_estimators=50;, score=0.947 total time=   0.1s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=2, min_samples_split=2, n_estimators=87;, score=0.536 total time=   0.1s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=2, min_samples_split=2, n_estimators=125;, score=0.939 total time=   0.1s
[CV 2/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=2, min_samples_split=2, n_estimators=162;, score=0.630 total time=   0.2s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=2, min_samples_split=2, n_estimators=200;, score=0.710 total time=   0.2s
[CV 3/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=2, min_samples_split=5, n_estimators=125;, score=0.944 total time=   0.1s
[CV 1/3] END criterion=log_loss, max_depth=4, max_features=log2, min_samples_leaf=2, min_samples_split=5, n_estimators=200;, score=0.702 total time=   0.2s
[CV 3/3] END max_depth=2, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=50;, score=0.637 total time=   0.1s
[CV 1/3] END max_depth=2, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=125;, score=0.756 total time=   0.3s
[CV 1/3] END max_depth=2, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=200;, score=0.686 total time=   0.3s
[CV 2/3] END max_depth=2, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=200;, score=0.739 total time=   0.3s
[CV 1/3] END max_depth=2, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=162;, score=0.785 total time=   0.3s
[CV 2/3] END max_depth=2, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=162;, score=0.783 total time=   0.3s
[CV 2/3] END max_depth=2, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=87;, score=0.750 total time=   0.1s
[CV 3/3] END max_depth=2, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=87;, score=0.676 total time=   0.1s
[CV 2/3] END max_depth=2, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=162;, score=0.742 total time=   0.3s
[CV 3/3] END max_depth=2, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=162;, score=0.775 total time=   0.3s
[CV 1/3] END max_depth=2, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=87;, score=0.728 total time=   0.2s
[CV 2/3] END max_depth=2, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=87;, score=0.721 total time=   0.2s
[CV 3/3] END max_depth=2, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=162;, score=0.790 total time=   0.4s
[CV 1/3] END max_depth=2, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=200;, score=0.747 total time=   0.4s
[CV 1/3] END max_depth=2, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=125;, score=0.704 total time=   0.3s
[CV 2/3] END max_depth=2, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=125;, score=0.717 total time=   0.2s
[CV 3/3] END max_depth=2, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=200;, score=0.704 total time=   0.3s
[CV 1/3] END max_depth=2, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=50;, score=0.621 total time=   0.1s
[CV 1/3] END max_depth=2, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=162;, score=0.779 total time=   0.2s
[CV 2/3] END max_depth=2, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=162;, score=0.735 total time=   0.2s
[CV 3/3] END max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=50;, score=0.695 total time=   0.1s
[CV 1/3] END max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=87;, score=0.716 total time=   0.1s
[CV 3/3] END max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=125;, score=0.738 total time=   0.2s
[CV 1/3] END max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=162;, score=0.672 total time=   0.2s
[CV 2/3] END max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=50;, score=0.662 total time=   0.1s
[CV 3/3] END max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=50;, score=0.597 total time=   0.1s
[CV 3/3] END max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=87;, score=0.713 total time=   0.1s
[CV 1/3] END max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=125;, score=0.752 total time=   0.2s
[CV 2/3] END max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=200;, score=0.738 total time=   0.3s
[CV 3/3] END max_depth=2, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=200;, score=0.727 total time=   0.3s
[CV 3/3] END max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=125;, score=0.666 total time=   0.3s
[CV 1/3] END max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=2, n_estimators=162;, score=0.762 total time=   0.4s
[CV 2/3] END max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=50;, score=0.680 total time=   0.1s
[CV 3/3] END max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=50;, score=0.708 total time=   0.1s
[CV 3/3] END max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=87;, score=0.766 total time=   0.2s
[CV 1/3] END max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=125;, score=0.775 total time=   0.3s
[CV 2/3] END max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=200;, score=0.739 total time=   0.5s
[CV 3/3] END max_depth=4, max_features=auto, min_samples_leaf=1, min_samples_split=5, n_estimators=200;, score=0.773 total time=   0.5s
[CV 1/3] END max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=200;, score=0.740 total time=   0.5s
[CV 2/3] END max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=2, n_estimators=200;, score=0.764 total time=   0.5s
[CV 1/3] END max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=162;, score=0.779 total time=   0.4s
[CV 2/3] END max_depth=4, max_features=auto, min_samples_leaf=2, min_samples_split=5, n_estimators=162;, score=0.797 total time=   0.4s
[CV 3/3] END max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=50;, score=0.707 total time=   0.1s
[CV 1/3] END max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=87;, score=0.712 total time=   0.2s
[CV 1/3] END max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=125;, score=0.742 total time=   0.3s
[CV 2/3] END max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=125;, score=0.757 total time=   0.3s
[CV 3/3] END max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=2, n_estimators=200;, score=0.749 total time=   0.5s
[CV 1/3] END max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=50;, score=0.752 total time=   0.1s
[CV 1/3] END max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=162;, score=0.740 total time=   0.4s
[CV 2/3] END max_depth=4, max_features=sqrt, min_samples_leaf=1, min_samples_split=5, n_estimators=162;, score=0.752 total time=   0.4s
[CV 3/3] END max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=50;, score=0.712 total time=   0.1s
[CV 1/3] END max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=87;, score=0.692 total time=   0.2s
[CV 3/3] END max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=125;, score=0.716 total time=   0.3s
[CV 1/3] END max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=2, n_estimators=162;, score=0.737 total time=   0.4s
[CV 2/3] END max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=50;, score=0.729 total time=   0.1s
[CV 3/3] END max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=50;, score=0.691 total time=   0.1s
[CV 3/3] END max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=87;, score=0.693 total time=   0.2s
[CV 1/3] END max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=125;, score=0.693 total time=   0.3s
[CV 2/3] END max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=200;, score=0.750 total time=   0.4s
[CV 3/3] END max_depth=4, max_features=sqrt, min_samples_leaf=2, min_samples_split=5, n_estimators=200;, score=0.770 total time=   0.4s
In [ ]: